In this report, we extract information about published JOSS papers and generate graphics as well as a summary table that can be downloaded and used for further analyses.
suppressPackageStartupMessages({
library(tibble)
library(rcrossref)
library(dplyr)
library(tidyr)
library(ggplot2)
library(lubridate)
library(gh)
library(purrr)
library(jsonlite)
library(DT)
library(plotly)
library(citecorp)
library(readr)
})## Keep track of the source of each column
source_track <- c()
## Determine whether to add a caption with today's date to the (non-interactive) plots
add_date_caption <- TRUE
if (add_date_caption) {
dcap <- lubridate::today()
} else {
dcap <- ""
}## Read archived version of summary data frame, to use for filling in
## information about software repositories (due to limit on API requests)
## Sort by the date when software repo info was last obtained
papers_archive <- readRDS(gzcon(url("https://github.com/openjournals/joss-analytics/blob/gh-pages/joss_submission_analytics.rds?raw=true"))) %>%
dplyr::arrange(!is.na(repo_info_obtained), repo_info_obtained)
## Similarly for citation analysis, to avoid having to pull down the
## same information multiple times
citations_archive <- readr::read_delim(
url("https://github.com/openjournals/joss-analytics/blob/gh-pages/joss_submission_citations.tsv?raw=true"),
col_types = cols(.default = "c"), col_names = TRUE,
delim = "\t")We get the information about published JOSS papers from Crossref,
using the rcrossref R package. This package is also used to
extract citation counts.
## Fetch JOSS papers from Crossref
## Only 1000 papers at the time can be pulled down
lim <- 1000
papers <- rcrossref::cr_works(filter = c(issn = "2475-9066"),
limit = lim)$data
i <- 1
while (nrow(papers) == i * lim) {
papers <- dplyr::bind_rows(
papers,
rcrossref::cr_works(filter = c(issn = "2475-9066"),
limit = lim, offset = i * lim)$data)
i <- i + 1
}
papers <- papers %>%
dplyr::filter(type == "journal-article")
## A few papers don't have DOIs - generate them from the URL
noaltid <- which(is.na(papers$alternative.id))
papers$alternative.id[noaltid] <- gsub("http://dx.doi.org/", "",
papers$url[noaltid])
## Get citation info from Crossref and merge with paper details
cit <- rcrossref::cr_citation_count(doi = papers$alternative.id)
papers <- papers %>% dplyr::left_join(
cit %>% dplyr::rename(citation_count = count),
by = c("alternative.id" = "doi")
)## Warning in dplyr::left_join(., cit %>% dplyr::rename(citation_count = count), : Each row in `x` is expected to match at most 1 row in `y`.
## ℹ Row 952 of `x` matches multiple rows.
## ℹ If multiple matches are expected, set `multiple = "all"` to silence this
## warning.
## Remove one duplicated paper
papers <- papers %>% dplyr::filter(alternative.id != "10.21105/joss.00688")
source_track <- c(source_track,
structure(rep("crossref", ncol(papers)),
names = colnames(papers)))For each published paper, we use the Whedon API to get information about pre-review and review issue numbers, corresponding software repository etc.
whedon <- list()
p <- 1
a <- jsonlite::fromJSON(
url(paste0("https://joss.theoj.org/papers/published.json?page=", p)),
simplifyDataFrame = FALSE
)
while (length(a) > 0) {
whedon <- c(whedon, a)
p <- p + 1
a <- tryCatch({
jsonlite::fromJSON(
url(paste0("https://joss.theoj.org/papers/published.json?page=", p)),
simplifyDataFrame = FALSE
)},
error = function(e) return(numeric(0))
)
}
whedon <- do.call(dplyr::bind_rows, lapply(whedon, function(w) {
data.frame(api_title = w$title,
api_state = w$state,
editor = paste(w$metadata$paper$editor, collapse = ","),
reviewers = paste(w$reviewers, collapse = ","),
nbr_reviewers = length(w$reviewers),
repo_url = w$repository_url,
review_issue_id = w$review_issue_id,
doi = w$doi,
prereview_issue_id = ifelse(!is.null(w$meta_review_issue_id),
w$meta_review_issue_id, NA_integer_),
languages = paste(w$metadata$paper$languages, collapse = ","),
archive_doi = w$metadata$paper$archive_doi)
}))
papers <- papers %>% dplyr::left_join(whedon, by = c("alternative.id" = "doi"))
source_track <- c(source_track,
structure(rep("whedon", length(setdiff(colnames(papers),
names(source_track)))),
names = setdiff(colnames(papers), names(source_track))))From each pre-review and review issue, we extract information about review times and assigned labels.
## Pull down info on all issues in the joss-reviews repository
issues <- gh("/repos/openjournals/joss-reviews/issues",
.limit = 5000, state = "all")## From each issue, extract required information
iss <- do.call(dplyr::bind_rows, lapply(issues, function(i) {
data.frame(title = i$title,
number = i$number,
state = i$state,
opened = i$created_at,
closed = ifelse(!is.null(i$closed_at),
i$closed_at, NA_character_),
ncomments = i$comments,
labels = paste(setdiff(
vapply(i$labels, getElement,
name = "name", character(1L)),
c("review", "pre-review", "query-scope", "paused")),
collapse = ","))
}))
## Split into REVIEW, PRE-REVIEW, and other issues (the latter category
## is discarded)
issother <- iss %>% dplyr::filter(!grepl("\\[PRE REVIEW\\]", title) &
!grepl("\\[REVIEW\\]", title))
dim(issother)## [1] 133 7
head(issother)## title number state opened
## 1 ## Review checklist for @mstimberg 5002 closed 2022-12-09T16:15:19Z
## 2 @editorialbot generate my checklist 4858 closed 2022-10-17T07:29:25Z
## 3 review 4806 closed 2022-09-28T08:35:30Z
## 4 Add policy to checklist 4682 closed 2022-08-19T07:36:27Z
## 5 @editorialbot generate my checklist 4609 closed 2022-07-26T10:29:57Z
## 6 @editorialbot commands 4608 closed 2022-07-26T10:21:06Z
## closed ncomments labels
## 1 2022-12-09T16:15:21Z 1
## 2 2022-10-17T07:29:27Z 4
## 3 2022-09-28T08:35:32Z 2
## 4 2022-08-26T08:27:00Z 0
## 5 2022-07-26T10:29:58Z 1
## 6 2022-07-26T10:21:07Z 1
## For REVIEW issues, generate the DOI of the paper from the issue number
getnbrzeros <- function(s) {
paste(rep(0, 5 - nchar(s)), collapse = "")
}
issrev <- iss %>% dplyr::filter(grepl("\\[REVIEW\\]", title)) %>%
dplyr::mutate(nbrzeros = purrr::map_chr(number, getnbrzeros)) %>%
dplyr::mutate(alternative.id = paste0("10.21105/joss.",
nbrzeros,
number)) %>%
dplyr::select(-nbrzeros) %>%
dplyr::mutate(title = gsub("\\[REVIEW\\]: ", "", title)) %>%
dplyr::rename_at(vars(-alternative.id), ~ paste0("review_", .))## For pre-review and review issues, respectively, get the number of
## issues closed each month, and the number of those that have the
## 'rejected' label
review_rejected <- iss %>%
dplyr::filter(grepl("\\[REVIEW\\]", title)) %>%
dplyr::filter(!is.na(closed)) %>%
dplyr::mutate(closedmonth = lubridate::floor_date(as.Date(closed), "month")) %>%
dplyr::group_by(closedmonth) %>%
dplyr::summarize(nbr_issues_closed = length(labels),
nbr_rejections = sum(grepl("rejected", labels))) %>%
dplyr::mutate(itype = "review")
prereview_rejected <- iss %>%
dplyr::filter(grepl("\\[PRE REVIEW\\]", title)) %>%
dplyr::filter(!is.na(closed)) %>%
dplyr::mutate(closedmonth = lubridate::floor_date(as.Date(closed), "month")) %>%
dplyr::group_by(closedmonth) %>%
dplyr::summarize(nbr_issues_closed = length(labels),
nbr_rejections = sum(grepl("rejected", labels))) %>%
dplyr::mutate(itype = "pre-review")
all_rejected <- dplyr::bind_rows(review_rejected, prereview_rejected)## For PRE-REVIEW issues, add information about the corresponding REVIEW
## issue number
isspre <- iss %>% dplyr::filter(grepl("\\[PRE REVIEW\\]", title)) %>%
dplyr::filter(!grepl("withdrawn", labels)) %>%
dplyr::filter(!grepl("rejected", labels))
## Some titles have multiple pre-review issues. In these cases, keep the latest
isspre <- isspre %>% dplyr::arrange(desc(number)) %>%
dplyr::filter(!duplicated(title)) %>%
dplyr::mutate(title = gsub("\\[PRE REVIEW\\]: ", "", title)) %>%
dplyr::rename_all(~ paste0("prerev_", .))
papers <- papers %>% dplyr::left_join(issrev, by = "alternative.id") %>%
dplyr::left_join(isspre, by = c("prereview_issue_id" = "prerev_number")) %>%
dplyr::mutate(prerev_opened = as.Date(prerev_opened),
prerev_closed = as.Date(prerev_closed),
review_opened = as.Date(review_opened),
review_closed = as.Date(review_closed)) %>%
dplyr::mutate(days_in_pre = prerev_closed - prerev_opened,
days_in_rev = review_closed - review_opened,
to_review = !is.na(review_opened))
source_track <- c(source_track,
structure(rep("joss-github", length(setdiff(colnames(papers),
names(source_track)))),
names = setdiff(colnames(papers), names(source_track))))## Reorder so that software repositories that were interrogated longest
## ago are checked first
tmporder <- order(match(papers$alternative.id, papers_archive$alternative.id),
na.last = FALSE)
software_urls <- papers$repo_url[tmporder]
is_github <- grepl("github", software_urls)
length(is_github)## [1] 1970
sum(is_github)## [1] 1863
software_urls[!is_github]## [1] "https://framagit.org/GustaveCoste/eldam"
## [2] "https://framagit.org/GustaveCoste/eldam"
## [3] "https://framagit.org/GustaveCoste/eldam"
## [4] "https://framagit.org/GustaveCoste/eldam"
## [5] "https://gitlab.mpikg.mpg.de/curcuraci/bmiptools"
## [6] "https://gitlab.pasteur.fr/vlegrand/ROCK"
## [7] "https://gitlab.inria.fr/bramas/tbfmm"
## [8] "https://gitlab.com/pyFBS/pyFBS"
## [9] "https://gitlab.com/mmartin-lagarde/exonoodle-exoplanets/-/tree/master/"
## [10] "https://gitlab.com/myqueue/myqueue"
## [11] "https://gitlab.com/utopia-project/utopia"
## [12] "https://gitlab.com/fduchate/predihood"
## [13] "https://gitlab.com/moerman1/fhi-cc4s"
## [14] "https://jugit.fz-juelich.de/compflu/swalbe.jl/"
## [15] "https://gitlab.com/wpettersson/kep_solver"
## [16] "https://gitlab.dune-project.org/dorie/dorie"
## [17] "https://gitlab.kuleuven.be/ITSCreaLab/public-toolboxes/dyntapy"
## [18] "https://gitlab.com/dmt-development/dmt-core"
## [19] "https://bitbucket.org/orionmhdteam/orion2_release1/src/master/"
## [20] "https://bitbucket.org/meg/cbcbeat"
## [21] "https://bitbucket.org/cardosan/brightway2-temporalis"
## [22] "https://gitlab.com/jason-rumengan/pyarma"
## [23] "https://gitlab.com/petsc/petsc"
## [24] "https://savannah.nongnu.org/projects/complot/"
## [25] "https://bitbucket.org/berkeleylab/hardware-control/src/main/"
## [26] "https://gitlab.com/libreumg/dataquier.git"
## [27] "https://gitlab.inria.fr/miet/miet"
## [28] "https://gitlab.com/utopia-project/dantro"
## [29] "https://bitbucket.org/manuela_s/hcp/"
## [30] "https://gitlab.com/manchester_qbi/manchester_qbi_public/madym_cxx/"
## [31] "https://bitbucket.org/hammurabicode/hamx"
## [32] "http://mutabit.com/repos.fossil/grafoscopio/"
## [33] "https://gitlab.com/gdetor/genetic_alg"
## [34] "https://gitlab.com/ffaucher/hawen"
## [35] "https://gitlab.com/ProjectRHEA/flowsolverrhea"
## [36] "https://gitlab.com/culturalcartography/text2map"
## [37] "https://gitlab.inria.fr/bcoye/game-engine-scheduling-simulation"
## [38] "https://gitlab.com/cerfacs/batman"
## [39] "https://gite.lirmm.fr/doccy/RedOak"
## [40] "https://gitlab.com/emd-dev/emd"
## [41] "https://gitlab.com/fibreglass/pivc"
## [42] "https://gricad-gitlab.univ-grenoble-alpes.fr/ttk/spam/"
## [43] "https://gitlab.ethz.ch/holukas/dyco-dynamic-lag-compensation"
## [44] "https://bitbucket.org/clhaley/Multitaper.jl"
## [45] "https://git.rwth-aachen.de/ants/sensorlab/imea"
## [46] "https://gitlab.com/InspectorCell/inspectorcell"
## [47] "https://earth.bsc.es/gitlab/wuruchi/autosubmitreact"
## [48] "https://framagit.org/GustaveCoste/off-product-environmental-impact/"
## [49] "https://gitlab.com/remram44/taguette"
## [50] "https://gitlab.com/marinvaders/marinvaders"
## [51] "https://gitlab.com/dlr-dw/ontocode"
## [52] "https://gitlab.com/vibes-developers/vibes"
## [53] "https://bitbucket.org/rram/dvrlib/src/joss/"
## [54] "https://gitlab.com/sails-dev/sails"
## [55] "https://gitlab.uliege.be/smart_grids/public/gboml"
## [56] "https://bitbucket.org/bmskinner/nuclear_morphology"
## [57] "https://gitlab.com/project-dare/dare-platform"
## [58] "https://gitlab.com/picos-api/picos"
## [59] "https://gitlab.com/sissopp_developers/sissopp"
## [60] "https://gitlab.gwdg.de/mpievolbio-it/crbhits"
## [61] "https://bitbucket.org/mpi4py/mpi4py-fft"
## [62] "https://www.idpoisson.fr/fullswof/"
## [63] "https://bitbucket.org/cdegroot/wediff"
## [64] "https://gitlab.com/eidheim/Simple-Web-Server"
## [65] "https://gitlab.com/toposens/public/ros-packages"
## [66] "https://bitbucket.org/glotzer/rowan"
## [67] "https://gitlab.com/QComms/cqptoolkit"
## [68] "https://gitlab.com/dsbowen/conditional-inference"
## [69] "https://gitlab.com/thartwig/asloth"
## [70] "https://code.usgs.gov/umesc/quant-ecology/fishstan/"
## [71] "https://git.geomar.de/digital-earth/dasf/dasf-messaging-python"
## [72] "https://bitbucket.org/basicsums/basicsums"
## [73] "https://gitlab.inria.fr/azais/treex"
## [74] "https://gitlab.com/moorepants/skijumpdesign"
## [75] "https://git.iws.uni-stuttgart.de/tools/frackit"
## [76] "https://gitlab.com/cosmograil/PyCS3"
## [77] "https://bitbucket.org/miketuri/perl-spice-sim-seus/"
## [78] "https://bitbucket.org/likask/mofem-cephas"
## [79] "https://bitbucket.org/ocellarisproject/ocellaris"
## [80] "https://gitlab.inria.fr/mosaic/bvpy"
## [81] "https://bitbucket.org/berkeleylab/esdr-pygdh/"
## [82] "https://bitbucket.org/cmutel/brightway2"
## [83] "https://gitlab.com/dlr-ve/autumn/"
## [84] "https://gitlab.com/davidtourigny/dynamic-fba"
## [85] "https://sourceforge.net/p/mcapl/mcapl_code/ci/master/tree/"
## [86] "https://gitlab.com/ags-data-format-wg/ags-python-library"
## [87] "https://bitbucket.org/dolfin-adjoint/pyadjoint"
## [88] "https://gitlab.com/LMSAL_HUB/aia_hub/aiapy"
## [89] "https://gitlab.com/materials-modeling/wulffpack"
## [90] "https://gitlab.com/costrouc/pysrim"
## [91] "https://git.mpib-berlin.mpg.de/castellum/castellum"
## [92] "https://gitlab.com/permafrostnet/teaspoon"
## [93] "https://gitlab.com/tesch1/cppduals"
## [94] "https://doi.org/10.17605/OSF.IO/3DS6A"
## [95] "https://gitlab.com/geekysquirrel/bigx"
## [96] "https://gitlab.com/datafold-dev/datafold/"
## [97] "https://bitbucket.org/cloopsy/android/"
## [98] "https://bitbucket.org/dghoshal/frieda"
## [99] "https://gitlab.com/gims-developers/gims"
## [100] "https://gitlab.com/programgreg/tagginglatencyestimator"
## [101] "https://gitlab.com/energyincities/besos/"
## [102] "https://gitlab.com/celliern/scikit-fdiff/"
## [103] "https://gitlab.com/ampere2/metalwalls"
## [104] "https://gitlab.ruhr-uni-bochum.de/reichp2y/proppy"
## [105] "https://gitlab.com/dglaeser/fieldcompare"
## [106] "https://c4science.ch/source/tamaas/"
## [107] "https://bitbucket.org/mituq/muq2.git"
df <- do.call(dplyr::bind_rows, lapply(software_urls[is_github], function(u) {
u0 <- gsub("^http://", "https://", gsub("\\.git$", "", gsub("/$", "", u)))
if (grepl("/tree/", u0)) {
u0 <- strsplit(u0, "/tree/")[[1]][1]
}
if (grepl("/blob/", u0)) {
u0 <- strsplit(u0, "/blob/")[[1]][1]
}
info <- try({
gh(gsub("(https://)?(www.)?github.com/", "/repos/", u0))
})
languages <- try({
gh(paste0(gsub("(https://)?(www.)?github.com/", "/repos/", u0), "/languages"),
.limit = 500)
})
topics <- try({
gh(paste0(gsub("(https://)?(www.)?github.com/", "/repos/", u0), "/topics"),
.accept = "application/vnd.github.mercy-preview+json", .limit = 500)
})
contribs <- try({
gh(paste0(gsub("(https://)?(www.)?github.com/", "/repos/", u0), "/contributors"),
.limit = 500)
})
if (!is(info, "try-error") && length(info) > 1) {
if (!is(contribs, "try-error")) {
if (length(contribs) == 0) {
repo_nbr_contribs <- repo_nbr_contribs_2ormore <- NA_integer_
} else {
repo_nbr_contribs <- length(contribs)
repo_nbr_contribs_2ormore <- sum(vapply(contribs, function(x) x$contributions >= 2, NA_integer_))
if (is.na(repo_nbr_contribs_2ormore)) {
print(contribs)
}
}
} else {
repo_nbr_contribs <- repo_nbr_contribs_2ormore <- NA_integer_
}
if (!is(languages, "try-error")) {
if (length(languages) == 0) {
repolang <- ""
} else {
repolang <- paste(paste(names(unlist(languages)),
unlist(languages), sep = ":"), collapse = ",")
}
} else {
repolang <- ""
}
if (!is(topics, "try-error")) {
if (length(topics$names) == 0) {
repotopics <- ""
} else {
repotopics <- paste(unlist(topics$names), collapse = ",")
}
} else {
repotopics <- ""
}
data.frame(repo_url = u,
repo_created = info$created_at,
repo_updated = info$updated_at,
repo_pushed = info$pushed_at,
repo_nbr_stars = info$stargazers_count,
repo_language = ifelse(!is.null(info$language),
info$language, NA_character_),
repo_languages_bytes = repolang,
repo_topics = repotopics,
repo_license = ifelse(!is.null(info$license),
info$license$key, NA_character_),
repo_nbr_contribs = repo_nbr_contribs,
repo_nbr_contribs_2ormore = repo_nbr_contribs_2ormore
)
} else {
NULL
}
})) %>%
dplyr::mutate(repo_created = as.Date(repo_created),
repo_updated = as.Date(repo_updated),
repo_pushed = as.Date(repo_pushed)) %>%
dplyr::distinct() %>%
dplyr::mutate(repo_info_obtained = lubridate::today())
stopifnot(length(unique(df$repo_url)) == length(df$repo_url))
dim(df)
## For papers not in df (i.e., for which we didn't get a valid response
## from the GitHub API query), use information from the archived data frame
dfarchive <- papers_archive %>%
dplyr::select(colnames(df)[colnames(df) %in% colnames(papers_archive)]) %>%
dplyr::filter(!(repo_url %in% df$repo_url))
df <- dplyr::bind_rows(df, dfarchive)
papers <- papers %>% dplyr::left_join(df, by = "repo_url")
source_track <- c(source_track,
structure(rep("sw-github", length(setdiff(colnames(papers),
names(source_track)))),
names = setdiff(colnames(papers), names(source_track))))## Convert publication date to Date format
## Add information about the half year (H1, H2) of publication
## Count number of authors
papers <- papers %>% dplyr::select(-reference, -license, -link) %>%
dplyr::mutate(published.date = as.Date(published.print)) %>%
dplyr::mutate(
halfyear = paste0(year(published.date),
ifelse(month(published.date) <= 6, "H1", "H2"))
) %>% dplyr::mutate(
halfyear = factor(halfyear,
levels = paste0(rep(sort(unique(year(published.date))),
each = 2), c("H1", "H2")))
) %>% dplyr::mutate(nbr_authors = vapply(author, function(a) nrow(a), NA_integer_))
papers <- papers %>% dplyr::distinct()
source_track <- c(source_track,
structure(rep("cleanup", length(setdiff(colnames(papers),
names(source_track)))),
names = setdiff(colnames(papers), names(source_track))))In some cases, fetching information from (e.g.) the GitHub API fails for a subset of the publications. There are also other reasons for missing values (for example, the earliest submissions do not have an associated pre-review issue). The table below lists the number of missing values for each of the variables in the data frame.
DT::datatable(
data.frame(variable = colnames(papers),
nbr_missing = colSums(is.na(papers))) %>%
dplyr::mutate(source = source_track[variable]),
escape = FALSE, rownames = FALSE,
filter = list(position = 'top', clear = FALSE),
options = list(scrollX = TRUE)
)ggplot(papers %>%
dplyr::mutate(pubmonth = lubridate::floor_date(published.date, "month")) %>%
dplyr::group_by(pubmonth) %>%
dplyr::summarize(npub = n()),
aes(x = factor(pubmonth), y = npub)) +
geom_bar(stat = "identity") + theme_minimal() +
labs(x = "", y = "Number of published papers per month", caption = dcap) +
theme(axis.title = element_text(size = 15),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))ggplot(papers %>%
dplyr::mutate(pubyear = lubridate::year(published.date)) %>%
dplyr::group_by(pubyear) %>%
dplyr::summarize(npub = n()),
aes(x = factor(pubyear), y = npub)) +
geom_bar(stat = "identity") + theme_minimal() +
labs(x = "", y = "Number of published papers per year", caption = dcap) +
theme(axis.title = element_text(size = 15),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))The plots below illustrate the fraction of pre-review and review issues closed during each month that have the ‘rejected’ label attached.
ggplot(all_rejected,
aes(x = factor(closedmonth), y = nbr_rejections/nbr_issues_closed)) +
geom_bar(stat = "identity") +
theme_minimal() +
facet_wrap(~ itype, ncol = 1) +
labs(x = "Month of issue closing", y = "Fraction of issues rejected",
caption = dcap) +
theme(axis.title = element_text(size = 15),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))Papers with 20 or more citations are grouped in the “>=20” category.
ggplot(papers %>%
dplyr::mutate(citation_count = replace(citation_count,
citation_count >= 20, ">=20")) %>%
dplyr::mutate(citation_count = factor(citation_count,
levels = c(0:20, ">=20"))) %>%
dplyr::group_by(citation_count) %>%
dplyr::tally(),
aes(x = citation_count, y = n)) +
geom_bar(stat = "identity") +
theme_minimal() +
labs(x = "Crossref citation count", y = "Number of publications", caption = dcap)The table below sorts the JOSS papers in decreasing order by the number of citations in Crossref.
DT::datatable(
papers %>%
dplyr::mutate(url = paste0("<a href='", url, "' target='_blank'>",
url,"</a>")) %>%
dplyr::arrange(desc(citation_count)) %>%
dplyr::select(title, url, published.date, citation_count),
escape = FALSE,
filter = list(position = 'top', clear = FALSE),
options = list(scrollX = TRUE)
)plotly::ggplotly(
ggplot(papers, aes(x = published.date, y = citation_count, label = title)) +
geom_point(alpha = 0.5) + theme_bw() + scale_y_sqrt() +
geom_smooth() +
labs(x = "Date of publication", y = "Crossref citation count", caption = dcap) +
theme(axis.title = element_text(size = 15)),
tooltip = c("label", "x", "y")
)## Warning: The following aesthetics were dropped during statistical transformation: label
## ℹ This can happen when ggplot fails to infer the correct grouping structure in
## the data.
## ℹ Did you forget to specify a `group` aesthetic or to convert a numerical
## variable into a factor?
Here, we plot the citation count for all papers published within each half year, sorted in decreasing order.
ggplot(papers %>% dplyr::group_by(halfyear) %>%
dplyr::arrange(desc(citation_count)) %>%
dplyr::mutate(idx = seq_along(citation_count)),
aes(x = idx, y = citation_count)) +
geom_point(alpha = 0.5) +
facet_wrap(~ halfyear, scales = "free") +
theme_bw() +
labs(x = "Index", y = "Crossref citation count", caption = dcap)In these plots we investigate whether the time a submission spends in the pre-review or review stage (or their sum) has changed over time. The blue curve corresponds to a rolling median for submissions over 120 days.
## Helper functions (modified from https://stackoverflow.com/questions/65147186/geom-smooth-with-median-instead-of-mean)
rolling_median <- function(formula, data, xwindow = 120, ...) {
## Get order of x-values and sort x/y
ordr <- order(data$x)
x <- data$x[ordr]
y <- data$y[ordr]
## Initialize vector for smoothed y-values
ys <- rep(NA, length(x))
## Calculate median y-value for each unique x-value
for (xs in setdiff(unique(x), NA)) {
## Get x-values in the window, and calculate median of corresponding y
j <- ((xs - xwindow/2) < x) & (x < (xs + xwindow/2))
ys[x == xs] <- median(y[j], na.rm = TRUE)
}
y <- ys
structure(list(x = x, y = y, f = approxfun(x, y)), class = "rollmed")
}
predict.rollmed <- function(mod, newdata, ...) {
setNames(mod$f(newdata$x), newdata$x)
}ggplot(papers, aes(x = prerev_opened, y = as.numeric(days_in_pre))) +
geom_point() +
geom_smooth(formula = y ~ x, method = "rolling_median",
se = FALSE, method.args = list(xwindow = 120)) +
theme_bw() +
labs(x = "Date of pre-review opening", y = "Number of days in pre-review",
caption = dcap) +
theme(axis.title = element_text(size = 15))ggplot(papers, aes(x = review_opened, y = as.numeric(days_in_rev))) +
geom_point() +
geom_smooth(formula = y ~ x, method = "rolling_median",
se = FALSE, method.args = list(xwindow = 120)) +
theme_bw() +
labs(x = "Date of review opening", y = "Number of days in review",
caption = dcap) +
theme(axis.title = element_text(size = 15))ggplot(papers, aes(x = prerev_opened,
y = as.numeric(days_in_pre) + as.numeric(days_in_rev))) +
geom_point() +
geom_smooth(formula = y ~ x, method = "rolling_median",
se = FALSE, method.args = list(xwindow = 120)) +
theme_bw() +
labs(x = "Date of pre-review opening", y = "Number of days in pre-review + review",
caption = dcap) +
theme(axis.title = element_text(size = 15))Next, we consider the languages used by the submissions, both as reported by Whedon and based on the information encoded in available GitHub repositories (for the latter, we also record the number of bytes of code written in each language). Note that a given submission can use multiple languages.
## Language information from Whedon
sspl <- strsplit(papers$languages, ",")
all_languages <- unique(unlist(sspl))
langs <- do.call(dplyr::bind_rows, lapply(all_languages, function(l) {
data.frame(language = l,
nbr_submissions_Whedon = sum(vapply(sspl, function(v) l %in% v, 0)))
}))
## Language information from GitHub software repos
a <- lapply(strsplit(papers$repo_languages_bytes, ","), function(w) strsplit(w, ":"))
a <- a[sapply(a, length) > 0]
langbytes <- as.data.frame(t(as.data.frame(a))) %>%
setNames(c("language", "bytes")) %>%
dplyr::mutate(bytes = as.numeric(bytes)) %>%
dplyr::filter(!is.na(language)) %>%
dplyr::group_by(language) %>%
dplyr::summarize(nbr_bytes_GitHub = sum(bytes),
nbr_repos_GitHub = length(bytes)) %>%
dplyr::arrange(desc(nbr_bytes_GitHub))
langs <- dplyr::full_join(langs, langbytes, by = "language")ggplot(langs %>% dplyr::arrange(desc(nbr_submissions_Whedon)) %>%
dplyr::filter(nbr_submissions_Whedon > 10) %>%
dplyr::mutate(language = factor(language, levels = language)),
aes(x = language, y = nbr_submissions_Whedon)) +
geom_bar(stat = "identity") +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
labs(x = "", y = "Number of submissions", caption = dcap) +
theme(axis.title = element_text(size = 15))DT::datatable(
langs %>% dplyr::arrange(desc(nbr_bytes_GitHub)),
escape = FALSE,
filter = list(position = 'top', clear = FALSE),
options = list(scrollX = TRUE)
)ggplot(langs, aes(x = nbr_repos_GitHub, y = nbr_bytes_GitHub)) +
geom_point() + scale_x_log10() + scale_y_log10() + geom_smooth() +
theme_bw() +
labs(x = "Number of repos using the language",
y = "Total number of bytes of code\nwritten in the language",
caption = dcap) +
theme(axis.title = element_text(size = 15))ggplotly(
ggplot(papers, aes(x = citation_count, y = repo_nbr_stars,
label = title)) +
geom_point(alpha = 0.5) + scale_x_sqrt() + scale_y_sqrt() +
theme_bw() +
labs(x = "Crossref citation count", y = "Number of stars, GitHub repo",
caption = dcap) +
theme(axis.title = element_text(size = 15)),
tooltip = c("label", "x", "y")
)ggplot(papers, aes(x = as.numeric(prerev_opened - repo_created))) +
geom_histogram(bins = 50) +
theme_bw() +
labs(x = "Time (days) from repo creation to JOSS pre-review start",
caption = dcap) +
theme(axis.title = element_text(size = 15))ggplot(papers, aes(x = as.numeric(repo_pushed - review_closed))) +
geom_histogram(bins = 50) +
theme_bw() +
labs(x = "Time (days) from closure of JOSS review to most recent commit in repo",
caption = dcap) +
theme(axis.title = element_text(size = 15)) +
facet_wrap(~ year(published.date), scales = "free_y")Submissions associated with rOpenSci and pyOpenSci are not considered here, since they are not explicitly reviewed at JOSS.
ggplot(papers %>%
dplyr::filter(!grepl("rOpenSci|pyOpenSci", prerev_labels)) %>%
dplyr::mutate(year = year(published.date)),
aes(x = nbr_reviewers)) + geom_bar() +
facet_wrap(~ year) + theme_bw() +
labs(x = "Number of reviewers", y = "Number of submissions", caption = dcap)Submissions associated with rOpenSci and pyOpenSci are not considered here, since they are not explicitly reviewed at JOSS.
reviewers <- papers %>%
dplyr::filter(!grepl("rOpenSci|pyOpenSci", prerev_labels)) %>%
dplyr::mutate(year = year(published.date)) %>%
dplyr::select(reviewers, year) %>%
tidyr::separate_rows(reviewers, sep = ",")
## Most active reviewers
DT::datatable(
reviewers %>% dplyr::group_by(reviewers) %>%
dplyr::summarize(nbr_reviews = length(year),
timespan = paste(unique(c(min(year), max(year))),
collapse = " - ")) %>%
dplyr::arrange(desc(nbr_reviews)),
escape = FALSE, rownames = FALSE,
filter = list(position = 'top', clear = FALSE),
options = list(scrollX = TRUE)
)ggplot(papers %>%
dplyr::mutate(year = year(published.date),
`r/pyOpenSci` = factor(
grepl("rOpenSci|pyOpenSci", prerev_labels),
levels = c("TRUE", "FALSE"))),
aes(x = editor)) + geom_bar(aes(fill = `r/pyOpenSci`)) +
theme_bw() + facet_wrap(~ year, ncol = 1) +
scale_fill_manual(values = c(`TRUE` = "grey65", `FALSE` = "grey35")) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
labs(x = "Editor", y = "Number of submissions", caption = dcap)all_licenses <- sort(unique(papers$repo_license))
license_levels = c(grep("apache", all_licenses, value = TRUE),
grep("bsd", all_licenses, value = TRUE),
grep("mit", all_licenses, value = TRUE),
grep("gpl", all_licenses, value = TRUE),
grep("mpl", all_licenses, value = TRUE))
license_levels <- c(license_levels, setdiff(all_licenses, license_levels))
ggplot(papers %>%
dplyr::mutate(repo_license = factor(repo_license,
levels = license_levels)),
aes(x = repo_license)) +
geom_bar() +
theme_bw() +
labs(x = "Software license", y = "Number of submissions", caption = dcap) +
theme(axis.title = element_text(size = 15),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
facet_wrap(~ year(published.date), scales = "free_y")## For plots below, replace licenses present in less
## than 2.5% of the submissions by 'other'
tbl <- table(papers$repo_license)
to_replace <- names(tbl[tbl <= 0.025 * nrow(papers)])ggplot(papers %>%
dplyr::mutate(year = year(published.date)) %>%
dplyr::mutate(repo_license = replace(repo_license,
repo_license %in% to_replace,
"other")) %>%
dplyr::mutate(year = factor(year),
repo_license = factor(
repo_license,
levels = license_levels[license_levels %in% repo_license]
)) %>%
dplyr::group_by(year, repo_license, .drop = FALSE) %>%
dplyr::count() %>%
dplyr::mutate(year = as.integer(as.character(year))),
aes(x = year, y = n, fill = repo_license)) + geom_area() +
theme_minimal() +
scale_fill_brewer(palette = "Set1", name = "Software\nlicense",
na.value = "grey") +
theme(axis.title = element_text(size = 15)) +
labs(x = "Year", y = "Number of submissions", caption = dcap)ggplot(papers %>%
dplyr::mutate(year = year(published.date)) %>%
dplyr::mutate(repo_license = replace(repo_license,
repo_license %in% to_replace,
"other")) %>%
dplyr::mutate(year = factor(year),
repo_license = factor(
repo_license,
levels = license_levels[license_levels %in% repo_license]
)) %>%
dplyr::group_by(year, repo_license, .drop = FALSE) %>%
dplyr::summarize(n = n()) %>%
dplyr::mutate(freq = n/sum(n)) %>%
dplyr::mutate(year = as.integer(as.character(year))),
aes(x = year, y = freq, fill = repo_license)) + geom_area() +
theme_minimal() +
scale_fill_brewer(palette = "Set1", name = "Software\nlicense",
na.value = "grey") +
theme(axis.title = element_text(size = 15)) +
labs(x = "Year", y = "Fraction of submissions", caption = dcap)a <- unlist(strsplit(papers$repo_topics, ","))
a <- a[!is.na(a)]
topicfreq <- table(a)
colors <- viridis::viridis(100)
set.seed(1234)
wordcloud::wordcloud(
names(topicfreq), sqrt(topicfreq), min.freq = 1, max.words = 300,
random.order = FALSE, rot.per = 0.05, use.r.layout = FALSE,
colors = colors, scale = c(10, 0.1), random.color = TRUE,
ordered.colors = FALSE, vfont = c("serif", "plain")
)DT::datatable(as.data.frame(topicfreq) %>%
dplyr::rename(topic = a, nbr_repos = Freq) %>%
dplyr::arrange(desc(nbr_repos)),
escape = FALSE, rownames = FALSE,
filter = list(position = 'top', clear = FALSE),
options = list(scrollX = TRUE))Here, we take a more detailed look at the papers that cite JOSS papers, using data from the Open Citations Corpus.
citations <- tryCatch({
citecorp::oc_coci_cites(doi = papers$alternative.id) %>%
dplyr::distinct() %>%
dplyr::mutate(citation_info_obtained = as.character(lubridate::today()))
}, error = function(e) {
NULL
})
dim(citations)## [1] 29658 8
if (!is.null(citations)) {
citations <- citations %>%
dplyr::filter(!(oci %in% citations_archive$oci))
tmpj <- rcrossref::cr_works(dois = unique(citations$citing))$data %>%
dplyr::select(contains("doi"), contains("container.title"), contains("issn"),
contains("type"), contains("publisher"), contains("prefix"))
citations <- citations %>% dplyr::left_join(tmpj, by = c("citing" = "doi"))
## bioRxiv preprints don't have a 'container.title' or 'issn', but we'll assume
## that they can be
## identified from the prefix 10.1101 - set the container.title
## for these records manually; we may or may not want to count these
## (would it count citations twice, both preprint and publication?)
citations$container.title[citations$prefix == "10.1101"] <- "bioRxiv"
## JOSS is represented by 'The Journal of Open Source Software' as well as
## 'Journal of Open Source Software'
citations$container.title[citations$container.title ==
"Journal of Open Source Software"] <-
"The Journal of Open Source Software"
## Remove real self citations (cited DOI = citing DOI)
citations <- citations %>% dplyr::filter(cited != citing)
## Merge with the archive
citations <- dplyr::bind_rows(citations, citations_archive)
} else {
citations <- citations_archive
if (is.null(citations[["citation_info_obtained"]])) {
citations$citation_info_obtained <- NA_character_
}
}
citations$citation_info_obtained[is.na(citations$citation_info_obtained)] <-
"2021-08-11"
write.table(citations, file = "joss_submission_citations.tsv",
row.names = FALSE, col.names = TRUE, sep = "\t", quote = FALSE)## Latest successful update of new citation data
max(as.Date(citations$citation_info_obtained))## [1] "2023-02-01"
## Number of JOSS papers with >0 citations included in this collection
length(unique(citations$cited))## [1] 1221
## Number of JOSS papers with >0 citations according to Crossref
length(which(papers$citation_count > 0))## [1] 1283
## Number of citations from Open Citations Corpus vs Crossref
df0 <- papers %>% dplyr::select(doi, citation_count) %>%
dplyr::full_join(citations %>% dplyr::group_by(cited) %>%
dplyr::tally() %>%
dplyr::mutate(n = replace(n, is.na(n), 0)),
by = c("doi" = "cited"))## Total citation count Crossref
sum(df0$citation_count, na.rm = TRUE)## [1] 32297
## Total citation count Open Citations Corpus
sum(df0$n, na.rm = TRUE)## [1] 28454
## Ratio of total citation count Open Citations Corpus/Crossref
sum(df0$n, na.rm = TRUE)/sum(df0$citation_count, na.rm = TRUE)## [1] 0.8810106
ggplot(df0, aes(x = citation_count, y = n)) +
geom_abline(slope = 1, intercept = 0) +
geom_point(size = 3, alpha = 0.5) +
labs(x = "Crossref citation count", y = "Open Citations Corpus citation count",
caption = dcap) +
theme_bw()## Zoom in
ggplot(df0, aes(x = citation_count, y = n)) +
geom_abline(slope = 1, intercept = 0) +
geom_point(size = 3, alpha = 0.5) +
labs(x = "Crossref citation count", y = "Open Citations Corpus citation count",
caption = dcap) +
theme_bw() +
coord_cartesian(xlim = c(0, 75), ylim = c(0, 75))## Number of journals citing JOSS papers
length(unique(citations$container.title))## [1] 6142
length(unique(citations$issn))## [1] 4749
topcit <- citations %>% dplyr::group_by(container.title) %>%
dplyr::summarize(nbr_citations_of_joss_papers = length(cited),
nbr_cited_joss_papers = length(unique(cited)),
nbr_citing_papers = length(unique(citing)),
nbr_selfcitations_of_joss_papers = sum(author_sc == "yes"),
fraction_selfcitations = signif(nbr_selfcitations_of_joss_papers /
nbr_citations_of_joss_papers, digits = 3)) %>%
dplyr::arrange(desc(nbr_cited_joss_papers))
DT::datatable(topcit,
escape = FALSE, rownames = FALSE,
filter = list(position = 'top', clear = FALSE),
options = list(scrollX = TRUE))plotly::ggplotly(
ggplot(topcit, aes(x = nbr_citations_of_joss_papers, y = nbr_cited_joss_papers,
label = container.title)) +
geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "grey") +
geom_point(size = 3, alpha = 0.5) +
theme_bw() +
labs(caption = dcap, x = "Number of citations of JOSS papers",
y = "Number of cited JOSS papers")
)plotly::ggplotly(
ggplot(topcit, aes(x = nbr_citations_of_joss_papers, y = nbr_cited_joss_papers,
label = container.title)) +
geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "grey") +
geom_point(size = 3, alpha = 0.5) +
theme_bw() +
coord_cartesian(xlim = c(0, 100), ylim = c(0, 50)) +
labs(caption = dcap, x = "Number of citations of JOSS papers",
y = "Number of cited JOSS papers")
)write.table(topcit, file = "joss_submission_citations_byjournal.tsv",
row.names = FALSE, col.names = TRUE, sep = "\t", quote = FALSE)The tibble object with all data collected above is serialized to a file that can be downloaded and reused.
head(papers) %>% as.data.frame()## alternative.id container.title created deposited
## 1 10.21105/joss.00295 The Journal of Open Source Software 2017-07-26 2019-10-01
## 2 10.21105/joss.03917 Journal of Open Source Software 2021-12-02 2021-12-02
## 3 10.21105/joss.01249 Journal of Open Source Software 2019-03-14 2019-11-17
## 4 10.21105/joss.01666 Journal of Open Source Software 2019-11-01 2019-11-17
## 5 10.21105/joss.04321 Journal of Open Source Software 2022-12-08 2022-12-08
## 6 10.21105/joss.04591 Journal of Open Source Software 2022-08-09 2022-08-09
## published.print doi indexed issn issue issued
## 1 2017-07-26 10.21105/joss.00295 2022-03-30 2475-9066 15 2017-07-26
## 2 2021-12-02 10.21105/joss.03917 2022-03-30 2475-9066 68 2021-12-02
## 3 2019-03-14 10.21105/joss.01249 2022-04-04 2475-9066 35 2019-03-14
## 4 2019-11-01 10.21105/joss.01666 2022-04-04 2475-9066 43 2019-11-01
## 5 2022-12-08 10.21105/joss.04321 2022-12-09 2475-9066 80 2022-12-08
## 6 2022-08-09 10.21105/joss.04591 2022-08-09 2475-9066 76 2022-08-09
## member page prefix publisher score source reference.count
## 1 8722 295 10.21105 The Open Journal 0 Crossref 5
## 2 8722 3917 10.21105 The Open Journal 0 Crossref 34
## 3 8722 1249 10.21105 The Open Journal 0 Crossref 1
## 4 8722 1666 10.21105 The Open Journal 0 Crossref 5
## 5 8722 4321 10.21105 The Open Journal 0 Crossref 15
## 6 8722 4591 10.21105 The Open Journal 0 Crossref 25
## references.count is.referenced.by.count
## 1 5 0
## 2 34 0
## 3 1 3
## 4 5 0
## 5 15 0
## 6 25 0
## title
## 1 biotmle: Targeted Learning for Biomarker Discovery
## 2 CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 3 dfitspy: a dfits|fitsort implementation in python
## 4 UncertainData.jl: a Julia package for working with measurements and datasets with uncertainties.
## 5 PyNM: a Lightweight Python implementation of Normative\nModeling
## 6 Mallob: Scalable SAT Solving On Demand With\nDecentralized Job Scheduling
## type url volume
## 1 journal-article http://dx.doi.org/10.21105/joss.00295 2
## 2 journal-article http://dx.doi.org/10.21105/joss.03917 6
## 3 journal-article http://dx.doi.org/10.21105/joss.01249 4
## 4 journal-article http://dx.doi.org/10.21105/joss.01666 4
## 5 journal-article http://dx.doi.org/10.21105/joss.04321 7
## 6 journal-article http://dx.doi.org/10.21105/joss.04591 7
## short.container.title
## 1 JOSS
## 2 JOSS
## 3 JOSS
## 4 JOSS
## 5 JOSS
## 6 JOSS
## author
## 1 http://orcid.org/0000-0002-7127-2789, http://orcid.org/0000-0003-2680-3066, http://orcid.org/0000-0002-3769-0127, FALSE, FALSE, FALSE, Nima, Weixin, Alan, S. Hejazi, Cai, E. Hubbard, first, additional, additional
## 2 http://orcid.org/0000-0003-2217-4768, FALSE, Shailesh, Kumar, first
## 3 http://orcid.org/0000-0001-8385-3276, FALSE, Romain, Thomas, first
## 4 http://orcid.org/0000-0001-6880-8725, FALSE, Kristian, Haaga, first
## 5 http://orcid.org/0000-0002-9940-8799, http://orcid.org/0000-0002-2253-1844, FALSE, FALSE, Annabelle, Guillaume, Harvey, Dumas, first, additional
## 6 http://orcid.org/0000-0003-3330-9349, http://orcid.org/0000-0002-4185-1851, FALSE, FALSE, Peter, Dominik, Sanders, Schreiber, first, additional
## subject
## 1 <NA>
## 2 <NA>
## 3 <NA>
## 4 <NA>
## 5 Pulmonary and Respiratory Medicine,Pediatrics, Perinatology and Child Health
## 6 Pulmonary and Respiratory Medicine,Pediatrics, Perinatology and Child Health
## citation_count
## 1 0
## 2 0
## 3 3
## 4 0
## 5 0
## 6 0
## api_title
## 1 biotmle: Targeted Learning for Biomarker Discovery
## 2 CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 3 dfitspy: a dfits|fitsort implementation in python
## 4 UncertainData.jl: a Julia package for working with measurements and datasets with uncertainties.
## 5 PyNM: a Lightweight Python implementation of Normative Modeling
## 6 Mallob: Scalable SAT Solving On Demand With Decentralized Job Scheduling
## api_state editor reviewers nbr_reviewers
## 1 accepted @karthik @NelleV 1
## 2 accepted @pdebuyl @Saran-nns,@mirca 2
## 3 accepted @arfon @taldcroft 1
## 4 accepted @oliviaguest @dmbates,@ahwillia 2
## 5 accepted @dfm @smkia,@saigerutherford 2
## 6 accepted @danielskatz @ARMartinelli,@massimotorquati 2
## repo_url review_issue_id
## 1 https://github.com/nhejazi/biotmle 295
## 2 https://github.com/carnotresearch/cr-sparse 3917
## 3 https://github.com/astrom-tom/dfitspy 1249
## 4 https://github.com/kahaaga/UncertainData.jl 1666
## 5 https://github.com/ppsp-team/PyNM 4321
## 6 https://github.com/domschrei/mallob 4591
## prereview_issue_id languages
## 1 156 Makefile,R,TeX
## 2 3913 Python,Shell,TeX
## 3 1217 Python,Makefile,HTML,TeX
## 4 1652 TeX,Julia
## 5 4247 TeX,Python,Jupyter Notebook
## 6 4535 CMake,Dockerfile,Shell,TeX,Python
## archive_doi
## 1 http://dx.doi.org/10.5281/zenodo.834849
## 2 https://doi.org/10.5281/zenodo.5749792
## 3 https://doi.org/10.5281/zenodo.2592698
## 4 https://doi.org/10.5281/zenodo.3522252
## 5 10.5281/zenodo.7396721
## 6 v1.1.0
## review_title
## 1 biotmle: Targeted Learning for Biomarker Discovery
## 2 CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 3 dfitspy
## 4 UncertainData.jl: a Julia package for working with measurements and datasets with uncertainties
## 5 PyNM: a Lightweight Python implementation of Normative Modeling
## 6 Mallob: Scalable SAT Solving On Demand With Decentralized Job Scheduling
## review_number review_state review_opened review_closed review_ncomments
## 1 295 closed 2017-06-14 2017-07-26 22
## 2 3917 closed 2021-11-16 2021-12-02 59
## 3 1249 closed 2019-02-12 2019-03-14 109
## 4 1666 closed 2019-08-21 2019-11-01 77
## 5 4321 closed 2022-04-15 2022-12-08 69
## 6 4591 closed 2022-07-18 2022-08-09 50
## review_labels
## 1 accepted,recommend-accept,published
## 2 accepted,TeX,Shell,Python,recommend-accept,published
## 3 accepted,recommend-accept,published
## 4 accepted,recommend-accept,published
## 5 accepted,TeX,Python,Jupyter Notebook,recommend-accept,published,Track: 1 (AASS)
## 6 accepted,Shell,CMake,recommend-accept,published,Dockerfile,EuroPar
## prerev_title
## 1 biotmle: Targeted Learning for Biomarker Discovery
## 2 CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 3 dfitspy
## 4 UncertainData.jl: a Julia package for working with measurements and datasets with uncertainties
## 5 PyNM: a Lightweight Python implementation of Normative Modeling
## 6 Mallob: Scalable SAT Solving On Demand With Decentralized Job Scheduling
## prerev_state prerev_opened prerev_closed prerev_ncomments
## 1 closed 2017-01-09 2017-06-14 27
## 2 closed 2021-11-12 2021-11-16 29
## 3 closed 2019-01-29 2019-02-12 44
## 4 closed 2019-08-16 2019-08-21 58
## 5 closed 2022-03-17 2022-04-15 25
## 6 closed 2022-07-01 2022-07-18 19
## prerev_labels days_in_pre days_in_rev to_review
## 1 rOpenSci 156 days 42 days TRUE
## 2 TeX,Shell,Python 4 days 16 days TRUE
## 3 Python,Makefile,HTML 14 days 30 days TRUE
## 4 TeX,Julia 5 days 72 days TRUE
## 5 TeX,Python,Jupyter Notebook,Track: 1 (AASS) 29 days 237 days TRUE
## 6 Shell,CMake,Dockerfile,EuroPar 17 days 22 days TRUE
## repo_created repo_updated repo_pushed repo_nbr_stars repo_language
## 1 2016-08-16 2021-10-14 2021-10-14 4 R
## 2 2020-12-22 2023-02-06 2022-10-14 54 Jupyter Notebook
## 3 2018-09-30 2022-03-31 2020-07-24 3 Python
## 4 2018-12-02 2021-11-19 2021-11-19 15 Julia
## 5 2019-06-03 2023-02-04 2022-12-08 17 Jupyter Notebook
## 6 2019-10-02 2023-01-13 2023-01-20 33 C++
## repo_languages_bytes
## 1 R:33321,TeX:5137,Makefile:763
## 2 Jupyter Notebook:1232323,Python:623505,TeX:18209,Shell:187
## 3 Python:51234,Makefile:1216,TeX:748,HTML:142
## 4 Julia:684786,TeX:2362
## 5 Jupyter Notebook:9847829,Python:111364,TeX:30647
## 6 C++:2272704,Python:86988,Shell:71689,C:7999,CMake:5923,Roff:2020,Dockerfile:766
## repo_topics
## 1 bioinformatics,biostatistics,bioconductor,statistics,machine-learning,causal-inference,r,bioconductor-packages,bioconductor-package,targeted-learning,biomarker-discovery,computational-biology
## 2 sparse-representations,jax,wavelets,convex-optimization,linear-operators,compressive-sensing,functional-programming,l1-regularization,sparse-linear-systems,lasso,sparse-bayesian-learning,basis-pursuit
## 3
## 4
## 5
## 6
## repo_license repo_nbr_contribs repo_nbr_contribs_2ormore repo_info_obtained
## 1 other 5 5 2023-02-15
## 2 apache-2.0 2 1 2023-02-08
## 3 gpl-3.0 2 1 2023-01-18
## 4 mit 5 2 2023-01-11
## 5 bsd-3-clause 4 4 2023-02-15
## 6 lgpl-3.0 3 2 2023-01-25
## published.date halfyear nbr_authors
## 1 2017-07-26 2017H2 3
## 2 2021-12-02 2021H2 1
## 3 2019-03-14 2019H1 1
## 4 2019-11-01 2019H2 1
## 5 2022-12-08 2022H2 2
## 6 2022-08-09 2022H2 2
saveRDS(papers, file = "joss_submission_analytics.rds")To read the current version of this file directly from GitHub, use the following code:
papers <- readRDS(gzcon(url("https://github.com/openjournals/joss-analytics/blob/gh-pages/joss_submission_analytics.rds?raw=true")))sessionInfo()## R version 4.2.2 (2022-10-31)
## Platform: x86_64-apple-darwin17.0 (64-bit)
## Running under: macOS Big Sur ... 10.16
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/4.2/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.2/Resources/lib/libRlapack.dylib
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] readr_2.1.4 citecorp_0.3.0 plotly_4.10.1 DT_0.27
## [5] jsonlite_1.8.4 purrr_1.0.1 gh_1.3.1 lubridate_1.9.2
## [9] ggplot2_3.4.1 tidyr_1.3.0 dplyr_1.1.0 rcrossref_1.2.009
## [13] tibble_3.1.8
##
## loaded via a namespace (and not attached):
## [1] nlme_3.1-160 bit64_4.0.5 RColorBrewer_1.1-3 httr_1.4.4
## [5] tools_4.2.2 bslib_0.4.2 utf8_1.2.3 R6_2.5.1
## [9] lazyeval_0.2.2 mgcv_1.8-41 colorspace_2.1-0 withr_2.5.0
## [13] tidyselect_1.2.0 gridExtra_2.3 bit_4.0.5 curl_5.0.0
## [17] compiler_4.2.2 cli_3.6.0 xml2_1.3.3 labeling_0.4.2
## [21] triebeard_0.3.0 sass_0.4.5 scales_1.2.1 stringr_1.5.0
## [25] digest_0.6.31 rmarkdown_2.20 pkgconfig_2.0.3 htmltools_0.5.4
## [29] fastmap_1.1.0 highr_0.10 htmlwidgets_1.6.1 rlang_1.0.6
## [33] httpcode_0.3.0 shiny_1.7.4 jquerylib_0.1.4 generics_0.1.3
## [37] farver_2.1.1 crosstalk_1.2.0 vroom_1.6.1 magrittr_2.0.3
## [41] wordcloud_2.6 fauxpas_0.5.0 Matrix_1.5-1 Rcpp_1.0.10
## [45] munsell_0.5.0 fansi_1.0.4 viridis_0.6.2 lifecycle_1.0.3
## [49] stringi_1.7.12 whisker_0.4.1 yaml_2.3.7 plyr_1.8.8
## [53] grid_4.2.2 parallel_4.2.2 promises_1.2.0.1 crayon_1.5.2
## [57] miniUI_0.1.1.1 lattice_0.20-45 splines_4.2.2 hms_1.1.2
## [61] knitr_1.42 pillar_1.8.1 crul_1.3 glue_1.6.2
## [65] evaluate_0.20 data.table_1.14.6 vctrs_0.5.2 tzdb_0.3.0
## [69] httpuv_1.6.9 urltools_1.7.3 gtable_0.3.1 cachem_1.0.6
## [73] xfun_0.37 mime_0.12 xtable_1.8-4 gitcreds_0.1.2
## [77] later_1.3.0 viridisLite_0.4.1 timechange_0.2.0 ellipsis_0.3.2